In [1]:
import sys
import os
sys.path.insert(0, os.path.abspath('../'))
In [2]:
import math
import qiskit

from qiskit import Aer
from qiskit.ignis.mitigation import CompleteMeasFitter
from qiskit.utils import QuantumInstance
from qiskit_optimization.algorithms import MinimumEigenOptimizer
from qiskit.algorithms import QAOA
from qiskit.algorithms.optimizers import SPSA, COBYLA
from qiskit.providers.aer.noise import NoiseModel

from shared.graph_helper import *
from shared.QiskitMaxcut import *

%matplotlib inline
In [3]:
provider = qiskit.IBMQ.load_account()
provider = qiskit.IBMQ.get_provider(hub='ibm-q-unibw', group='hochschule-muc', project='masterarbeit')
In [4]:
# ---- Define graph and MaxCut ----
graph = generate_butterfly_graph(with_weights=True)
max_cut = Maxcut(graph)
max_cut_qubo = max_cut.to_qubo()
max_cut.draw()
In [5]:
MAX_ITER = 10
optimizers = {
   # "ADAM": ADAM(maxiter=MAX_ITER), # benötigt extrem lange 
   # "AQGD": AQGD(maxiter=MAX_ITER), # benötigt extrem lange 
    "SPSA": SPSA(maxiter=MAX_ITER),
    #"QNSPSA": QNSPSA(maxiter=MAX_ITER), # fidelity muss gesetzt werden
    "COBYLA": COBYLA(maxiter=MAX_ITER),
    #"NELDER_MEAD": NELDER_MEAD(maxiter=MAX_ITER),
}
REPS_MAX = 10
REPS_MIN = 1
GAMMA_MIN = -math.pi
GAMMA_MAX = math.pi
BETA_MIN = -math.pi
BETA_MAX = math.pi
N_TRIALS = 2

# Noise 
BACKEND_NAME = 'ibmq_montreal'
noise_backend = provider.get_backend(BACKEND_NAME)
noise_model = NoiseModel.from_backend(noise_backend)
coupling_map = noise_backend.configuration().coupling_map
basis_gates = noise_model.basis_gates

SHOTS=8000
In [6]:
def init_qaoa(optimizer, reps, init_parameters):

    quantum_instance = QuantumInstance( backend=Aer.get_backend('qasm_simulator'), 
                                        shots=SHOTS,
                                        noise_model=noise_model,
                                        coupling_map=coupling_map,
                                        basis_gates=basis_gates,
                                        measurement_error_mitigation_cls=CompleteMeasFitter,
                                        measurement_error_mitigation_shots=SHOTS
                                      )                 
    
    qaoa = QAOA(optimizer=optimizer, 
                quantum_instance=quantum_instance, 
                reps=reps,
                initial_point=init_parameters) 
    
    return qaoa
In [7]:
def run(max_cut_problem, qaoa):
    # Run quantum algorithm QAOA
    algorithm = MinimumEigenOptimizer(qaoa)
    result = algorithm.solve(max_cut_problem)
    optimal_parameters = qaoa.optimal_params
    
    return result, optimal_parameters.copy() 
In [8]:
def objective(trial):
    optimizer_name = trial.suggest_categorical("optimizer", optimizers.keys())
    reps = trial.suggest_int('reps', REPS_MIN, REPS_MAX + 1)
    parameters = []
    for i in range(reps):
        parameters.append(trial.suggest_uniform(f'gamma_{i}', GAMMA_MIN, GAMMA_MAX))

    for i in range(reps):
        parameters.append(trial.suggest_uniform(f'beta_{i}', BETA_MIN, BETA_MAX))

    optimizer = optimizers[optimizer_name]
        
    qaoa = init_qaoa(optimizer, reps, parameters)
    
    result, optimal_parameters = run(max_cut_qubo, qaoa)
    
    mean, distribution = max_cut.analyse(result)
    trial.set_user_attr(key="best", value=[mean,result,optimal_parameters])   
    
    return -mean
In [9]:
def save_best_trial(study, trial):
    if study.best_trial.number == trial.number:
        mean, result, optimal_parameters = trial.user_attrs["best"]
        result_copy = OptimizationResult(result.x.copy(), result.fval.copy(), result.variables.copy(), status=result.status, samples=result.samples.copy())  
        study.set_user_attr(key="best", value=[mean,result_copy, optimal_parameters.copy()])
In [10]:
import optuna
study = optuna.create_study(direction='minimize')
study.optimize(objective, n_trials=N_TRIALS, callbacks=[save_best_trial])
[I 2021-07-27 10:12:17,934] A new study created in memory with name: no-name-90b88c87-41d3-4aae-bc36-34e142065c7e
[I 2021-07-27 10:12:44,461] Trial 0 finished with value: 24.17402862846094 and parameters: {'optimizer': 'SPSA', 'reps': 8, 'gamma_0': 0.5222677695858913, 'gamma_1': 0.6030370071384681, 'gamma_2': -0.9171256523314404, 'gamma_3': -1.713428668137644, 'gamma_4': 1.677790142634417, 'gamma_5': -1.2972220670579928, 'gamma_6': -1.2330403632111295, 'gamma_7': 1.9931784259499876, 'beta_0': 0.7968545744584206, 'beta_1': -1.9904907816769017, 'beta_2': 1.6191362257454953, 'beta_3': 0.616820188787949, 'beta_4': -1.1117186499275222, 'beta_5': 0.7706284557708956, 'beta_6': 0.5801372232581752, 'beta_7': -1.0024232470483967}. Best is trial 0 with value: 24.17402862846094.
[I 2021-07-27 10:12:56,693] Trial 1 finished with value: 23.776819374180324 and parameters: {'optimizer': 'COBYLA', 'reps': 6, 'gamma_0': -0.8416727424417059, 'gamma_1': 2.478454515781327, 'gamma_2': -0.9437238101404901, 'gamma_3': -1.4979622385452278, 'gamma_4': -2.6373952406592096, 'gamma_5': -0.5370591082505314, 'beta_0': -0.8750528950326029, 'beta_1': -1.924319851872841, 'beta_2': -0.790011995330067, 'beta_3': -0.607450099081281, 'beta_4': -2.4411025724618813, 'beta_5': -2.1640009246724503}. Best is trial 1 with value: 23.776819374180324.
In [11]:
print(study.best_trial)
FrozenTrial(number=1, values=[23.776819374180324], datetime_start=datetime.datetime(2021, 7, 27, 10, 12, 44, 465489), datetime_complete=datetime.datetime(2021, 7, 27, 10, 12, 56, 691059), params={'optimizer': 'COBYLA', 'reps': 6, 'gamma_0': -0.8416727424417059, 'gamma_1': 2.478454515781327, 'gamma_2': -0.9437238101404901, 'gamma_3': -1.4979622385452278, 'gamma_4': -2.6373952406592096, 'gamma_5': -0.5370591082505314, 'beta_0': -0.8750528950326029, 'beta_1': -1.924319851872841, 'beta_2': -0.790011995330067, 'beta_3': -0.607450099081281, 'beta_4': -2.4411025724618813, 'beta_5': -2.1640009246724503}, distributions={'optimizer': CategoricalDistribution(choices=('SPSA', 'COBYLA')), 'reps': IntUniformDistribution(high=11, low=1, step=1), 'gamma_0': UniformDistribution(high=3.141592653589793, low=-3.141592653589793), 'gamma_1': UniformDistribution(high=3.141592653589793, low=-3.141592653589793), 'gamma_2': UniformDistribution(high=3.141592653589793, low=-3.141592653589793), 'gamma_3': UniformDistribution(high=3.141592653589793, low=-3.141592653589793), 'gamma_4': UniformDistribution(high=3.141592653589793, low=-3.141592653589793), 'gamma_5': UniformDistribution(high=3.141592653589793, low=-3.141592653589793), 'beta_0': UniformDistribution(high=3.141592653589793, low=-3.141592653589793), 'beta_1': UniformDistribution(high=3.141592653589793, low=-3.141592653589793), 'beta_2': UniformDistribution(high=3.141592653589793, low=-3.141592653589793), 'beta_3': UniformDistribution(high=3.141592653589793, low=-3.141592653589793), 'beta_4': UniformDistribution(high=3.141592653589793, low=-3.141592653589793), 'beta_5': UniformDistribution(high=3.141592653589793, low=-3.141592653589793)}, user_attrs={'best': [-23.776819374180324, optimal function value: -35.0
optimal value: [0. 1. 0. 1. 0.]
status: SUCCESS, array([ 0.15832726,  3.47845452, -0.94372381, -1.49796224, -2.63739524,
       -0.53705911, -0.8750529 , -1.92431985, -0.790012  , -0.6074501 ,
       -2.44110257, -2.16400092])]}, system_attrs={}, intermediate_values={}, trial_id=1, state=TrialState.COMPLETE, value=None)
In [12]:
fig = optuna.visualization.plot_optimization_history(study)
fig.show()
In [13]:
mean, result, optimal_params = study.user_attrs["best"]
optimal_params
Out[13]:
array([ 0.15832726,  3.47845452, -0.94372381, -1.49796224, -2.63739524,
       -0.53705911, -0.8750529 , -1.92431985, -0.790012  , -0.6074501 ,
       -2.44110257, -2.16400092])
In [14]:
max_cut.draw(result)
In [15]:
mean, distribution = max_cut.analyse(result, print_output=True)
plot_histogram(distribution, color='midnightblue', title=f"Expectation Value (Ø: {mean:.3f})")
optimal function value: -35.0
optimal value: [0. 1. 0. 1. 0.]
status: SUCCESS
Mean: -23.776819374180324
00000: value: 0.000, probability: 3.3%
11111: value: 0.000, probability: 3.6%
10010: value: -13.000, probability: 3.6%
01101: value: -13.000, probability: 2.9%
10000: value: -15.000, probability: 2.6%
01111: value: -15.000, probability: 3.5%
00100: value: -16.000, probability: 3.0%
00010: value: -16.000, probability: 2.4%
11101: value: -16.000, probability: 3.1%
11011: value: -16.000, probability: 3.0%
01100: value: -17.000, probability: 3.1%
10011: value: -17.000, probability: 3.2%
01000: value: -19.000, probability: 3.3%
10111: value: -19.000, probability: 3.4%
10110: value: -29.000, probability: 3.3%
01001: value: -29.000, probability: 3.4%
11110: value: -30.000, probability: 3.9%
00001: value: -30.000, probability: 3.3%
10100: value: -31.000, probability: 2.8%
01011: value: -31.000, probability: 3.0%
11100: value: -32.000, probability: 3.2%
11010: value: -32.000, probability: 3.2%
00110: value: -32.000, probability: 2.6%
11001: value: -32.000, probability: 3.1%
00101: value: -32.000, probability: 3.3%
00011: value: -32.000, probability: 3.2%
01110: value: -33.000, probability: 3.0%
10001: value: -33.000, probability: 2.7%
11000: value: -34.000, probability: 3.1%
00111: value: -34.000, probability: 3.2%
01010: value: -35.000, probability: 3.1%
10101: value: -35.000, probability: 2.4%
Out[15]:
In [16]:
import qiskit.tools.jupyter
%qiskit_version_table

Version Information

Qiskit SoftwareVersion
Qiskit0.27.0
Terra0.17.4
Aer0.8.2
Ignis0.6.0
Aqua0.9.2
IBM Q Provider0.14.0
System information
Python3.8.6 (default, Jan 22 2021, 11:41:28) [GCC 8.4.1 20200928 (Red Hat 8.4.1-1)]
OSLinux
CPUs192
Memory (Gb)6046.015735626221
Tue Jul 27 10:12:57 2021 CEST